//#include <linux/kernel.h>
#include <linux/spinlock.h>
#include <linux/bootmem.h>
+#include <linux/vmalloc.h>
#include <asm/page.h>
#include <asm/hypervisor.h>
#include <asm/hypercall.h>
struct xen_ia64_privcmd_entry {
atomic_t map_count;
struct page* page;
- unsigned long mfn;
};
static void
{
atomic_set(&entry->map_count, 0);
entry->page = NULL;
- entry->mfn = INVALID_MFN;
}
+//TODO alloc_page() to allocate pseudo physical address space is
+// waste of memory.
+// When vti domain is created, qemu maps all of vti domain pages which
+// reaches to several hundred megabytes at least.
+// remove alloc_page().
static int
xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
unsigned long addr,
} else {
atomic_inc(&entry->map_count);
entry->page = page;
- entry->mfn = mfn;
}
out:
}
entry->page = NULL;
- entry->mfn = INVALID_MFN;
__free_page(page);
}
}
}
-struct xen_ia64_privcmd_file {
- struct file* file;
- atomic_t map_count;
+struct xen_ia64_privcmd_range {
+ atomic_t ref_count;
unsigned long pgoff; // in PAGE_SIZE
unsigned long num_entries;
};
struct xen_ia64_privcmd_vma {
- struct xen_ia64_privcmd_file* file;
+ struct xen_ia64_privcmd_range* range;
+
unsigned long num_entries;
struct xen_ia64_privcmd_entry* entries;
};
static void
__xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
- struct xen_ia64_privcmd_vma* privcmd_vma)
+ struct xen_ia64_privcmd_vma* privcmd_vma,
+ struct xen_ia64_privcmd_range* privcmd_range)
{
- struct xen_ia64_privcmd_file* privcmd_file =
- (struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
- unsigned long entry_offset = vma->vm_pgoff - privcmd_file->pgoff;
+ unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long i;
BUG_ON(entry_offset < 0);
- BUG_ON(entry_offset + num_entries > privcmd_file->num_entries);
+ BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
- privcmd_vma->file = privcmd_file;
+ privcmd_vma->range = privcmd_range;
privcmd_vma->num_entries = num_entries;
- privcmd_vma->entries = &privcmd_file->entries[entry_offset];
+ privcmd_vma->entries = &privcmd_range->entries[entry_offset];
vma->vm_private_data = privcmd_vma;
for (i = 0; i < privcmd_vma->num_entries; i++) {
xen_ia64_privcmd_entry_open(&privcmd_vma->entries[i]);
static void
xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
{
- struct xen_ia64_privcmd_file* privcmd_file =
- (struct xen_ia64_privcmd_file*)vma->vm_file->private_data;
- struct xen_ia64_privcmd_vma* privcmd_vma;
+ struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
+ struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
- atomic_inc(&privcmd_file->map_count);
+ atomic_inc(&privcmd_range->ref_count);
// vm_op->open() can't fail.
privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
- __xen_ia64_privcmd_vma_open(vma, privcmd_vma);
+ __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
}
static void
{
struct xen_ia64_privcmd_vma* privcmd_vma =
(struct xen_ia64_privcmd_vma*)vma->vm_private_data;
- struct xen_ia64_privcmd_file* privcmd_file = privcmd_vma->file;
+ struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
unsigned long i;
for (i = 0; i < privcmd_vma->num_entries; i++) {
vma->vm_private_data = NULL;
kfree(privcmd_vma);
- if (atomic_dec_and_test(&privcmd_file->map_count)) {
+ if (atomic_dec_and_test(&privcmd_range->ref_count)) {
#if 1
- for (i = 0; i < privcmd_file->num_entries; i++) {
+ for (i = 0; i < privcmd_range->num_entries; i++) {
struct xen_ia64_privcmd_entry* entry =
- &privcmd_vma->entries[i];
+ &privcmd_range->entries[i];
BUG_ON(atomic_read(&entry->map_count) != 0);
BUG_ON(entry->page != NULL);
}
#endif
- privcmd_file->file->private_data = NULL;
- kfree(privcmd_file->file->private_data);
+ vfree(privcmd_range);
}
}
privcmd_mmap(struct file * file, struct vm_area_struct * vma)
{
unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
- struct xen_ia64_privcmd_file* privcmd_file;
+ struct xen_ia64_privcmd_range* privcmd_range;
struct xen_ia64_privcmd_vma* privcmd_vma;
unsigned long i;
BUG_ON(!running_on_xen);
- /* DONTCOPY is essential for Xen as copy_page_range is broken. */
- vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
-
- if (file->private_data != NULL) {
- return -EBUSY;
- }
-
- privcmd_file = kmalloc(sizeof(*privcmd_file) +
- sizeof(privcmd_file->entries[0]) * num_entries,
- GFP_KERNEL);
- if (privcmd_file == NULL) {
+ BUG_ON(file->private_data != NULL);
+ privcmd_range =
+ vmalloc(sizeof(*privcmd_range) +
+ sizeof(privcmd_range->entries[0]) * num_entries);
+ if (privcmd_range == NULL) {
goto out_enomem0;
}
privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
goto out_enomem1;
}
- atomic_set(&privcmd_file->map_count, 1);
- privcmd_file->num_entries = num_entries;
- for (i = 0; i < privcmd_file->num_entries; i++) {
- xen_ia64_privcmd_init_entry(&privcmd_file->entries[i]);
+ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
+ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
+
+ atomic_set(&privcmd_range->ref_count, 1);
+ privcmd_range->pgoff = vma->vm_pgoff;
+ privcmd_range->num_entries = num_entries;
+ for (i = 0; i < privcmd_range->num_entries; i++) {
+ xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
}
- file->private_data = privcmd_file;
- privcmd_file->file = file;
- privcmd_file->pgoff = vma->vm_pgoff;
- __xen_ia64_privcmd_vma_open(vma, privcmd_vma);
+ __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
return 0;
out_enomem1:
kfree(privcmd_vma);
out_enomem0:
- kfree(privcmd_file);
+ vfree(privcmd_range);
return -ENOMEM;
}
i = (address - vma->vm_start) >> PAGE_SHIFT;
for (offset = 0; offset < size; offset += PAGE_SIZE) {
struct xen_ia64_privcmd_entry* entry =
- &privcmd_vma->file->entries[i];
+ &privcmd_vma->entries[i];
error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, entry, mfn, prot, domid);
if (error != 0) {
break;